# I load the needed libraries
library(dplyr)
library(scales)
library(GoFKernel)
library(mvtnorm)
library(gplots)
options(warn=-1)
Attaching package: ‘dplyr’
The following objects are masked from ‘package:stats’:
filter, lag
The following objects are masked from ‘package:base’:
intersect, setdiff, setequal, union
Loading required package: KernSmooth
KernSmooth 2.23 loaded
Copyright M. P. Wand 1997-2009
Attaching package: ‘gplots’
The following object is masked from ‘package:stats’:
lowess
I load the functions from the class file:
source("class_MCMC.R")
I define then the function that I want to use as output of the MCMCs:
# 5D distribution
gauss2_cauchy1_gauss2 = function (theta) {
sigmas = c(2.5, 4.3, 0, 3.5, 5)
centers = c(0.4, 9, 0, -4.7, 2.9)
product = 1
for (i in 1:2) {
product = product * exp(-(theta[i] - centers[i])**2/sigmas[i]**2)
}
product = product * (dcauchy(theta[3], -10, 2) + 4*dcauchy(theta[3], 10, 4))
for (i in 4:5) {
product = product * exp(-(theta[i] - centers[i])**2/sigmas[i]**2)
}
return (product)
}
chosen_function = gauss2_cauchy1_gauss2
Then I only have to determine the parameters for the initialization = the "hyperparameters" of the simulations
# The initial parameters are:
init = c(-4, -8, 12, 5, 3)
std = diag(1, 5)
N = as.integer(1e5)
burn_in = as.integer(1e4)
print_step = as.integer(1e3)
# print_init = as.integer(1e3)
N_tot = N + burn_in
# For Haario:
epsilon = 0.001
# MVTNORM
# Evaluate then the MCMC
mcmc_g = random_steps_mvtnorm (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE)
# Selecting the sequence after the burn-in
mcmc_g = mcmc_g[burn_in:N, ]
# Plotting the results
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 69.58636 %
# MVTNORM GIBBS
mcmc_g = random_steps_mvtnorm_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 83.674 %
# # SIMPLE ADAPTIVE
# mcmc_g = random_steps_simple (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
# gamma_function = gamma_series_exp, halved_step = burn_in)
# mcmc_g = mcmc_g[burn_in:N, ]
# show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
# # SIMPLE ADAPTIVE GIBBS
# mcmc_g = random_steps_simple_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
# gamma_function = gamma_series_exp, halved_step = burn_in)
# mcmc_g = mcmc_g[burn_in:N, ]
# show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
# HAARIO
mcmc_g = random_steps_haario (func_wanted = chosen_function, theta_init = init, n_samples = N_tot,
sigma = std, print_accept=TRUE, t_0 = burn_in, eps = epsilon)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 15.17909 %
Final mean = 0.3388725 9.061904 5.452783 -4.692806 2.869167
Final covariance matrix =
[,1] [,2] [,3] [,4] [,5]
[1,] 3.941854 6.964607 4.472432 -3.587848 2.218437
[2,] 6.964607 179.639923 110.977836 -86.674719 53.953638
[3,] 4.472432 110.977836 431.190118 -56.364764 34.798128
[4,] -3.587848 -86.674719 -56.364764 51.257229 -27.351365
[5,] 2.218437 53.953638 34.798128 -27.351365 31.260703
# HAARIO GIBBS
mcmc_g = random_steps_haario_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot,
sigma = std, print_accept=TRUE, t_0 = burn_in, eps = epsilon)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 29.43836 %
Final mean = 0.4110972 8.971942 6.058225 -4.705823 2.829803
Final covariance matrix =
[,1] [,2] [,3] [,4] [,5]
[1,] 4.484993 7.824889 3.002182 -4.162662 2.378963
[2,] 7.824889 175.363878 73.837647 -86.635749 50.293799
[3,] 3.002182 73.837647 510.952223 -37.901617 21.128591
[4,] -4.162662 -86.635749 -37.901617 50.818070 -26.350622
[5,] 2.378963 50.293799 21.128591 -26.350622 26.437705
# RAO
mcmc_g = random_steps_AM_rao (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in/2)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 35.77545 %
Final mean = 0.4024645 9.052838 6.777664 -4.650735 2.824813
Final covariance matrix =
[,1] [,2] [,3] [,4] [,5]
[1,] 3.167091102 -0.05633307 0.8309130 0.001863827 0.05999030
[2,] -0.056333066 8.98232146 2.4070662 -0.310535263 0.03024013
[3,] 0.830912973 2.40706618 753.7247179 0.160558312 -1.73876596
[4,] 0.001863827 -0.31053526 0.1605583 6.541868754 -0.12779801
[5,] 0.059990303 0.03024013 -1.7387660 -0.127798010 12.30369110
# RAO GIBBS
mcmc_g = random_steps_AM_rao_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in/2)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 52.59309 %
Final mean = 0.4217452 9.05701 4.860328 -4.706349 2.875816
Final covariance matrix =
[,1] [,2] [,3] [,4] [,5]
[1,] 4.30027097 -0.01056983 -0.8692743 -0.01539345 0.16112377
[2,] -0.01056983 8.19416717 0.9506353 0.11943763 0.05213324
[3,] -0.86927431 0.95063525 314.0543827 -0.12459672 0.52865042
[4,] -0.01539345 0.11943763 -0.1245967 4.88195844 -0.14084759
[5,] 0.16112377 0.05213324 0.5286504 -0.14084759 10.17053325
# GLOBAL
init = c(0, 8, 6, -4, 3)
mcmc_g = random_steps_global (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 47.50909 %
Final mean = 0.2254765 8.986283 10.29214 -4.971612 3.230204
Final lambda = -0.1534902
Final covariance matrix =
[,1] [,2] [,3] [,4] [,5]
[1,] 2.8664616 -0.4121207 3.713921 -0.9367729 -0.2330260
[2,] -0.4121207 8.1273072 -11.892169 -0.2436894 -0.2054130
[3,] 3.7139208 -11.8921692 900.656841 3.0374130 -2.2751198
[4,] -0.9367729 -0.2436894 3.037413 6.2238421 -0.4996665
[5,] -0.2330260 -0.2054130 -2.275120 -0.4996665 12.3303973
# GLOBAL GIBBS
mcmc_g = random_steps_global_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 70.65309 %
Final mean = 0.3369101 8.903986 6.561437 -4.650295 3.168858
Final lambda = -1.516926
Final covariance matrix =
[,1] [,2] [,3] [,4] [,5]
[1,] 4.526875692 -0.7520948 -0.01611936 0.1472590 -0.003124454
[2,] -0.752094845 8.8772369 7.71579389 -0.6055776 0.291074096
[3,] -0.016119356 7.7157939 198.37614137 -1.9320978 2.378454212
[4,] 0.147259030 -0.6055776 -1.93209776 5.7327101 0.178098090
[5,] -0.003124454 0.2910741 2.37845421 0.1780981 9.437931902